}
/* Allocate page table, return its machine address */
-u64 alloc_pgtable_maddr(struct domain *d)
+u64 alloc_pgtable_maddr(struct domain *d, unsigned long npages)
{
struct page_info *pg;
u64 *vaddr;
- pg = alloc_domheap_page(NULL, d ? MEMF_node(domain_to_node(d)) : 0);
+ pg = alloc_domheap_pages(NULL, get_order_from_pages(npages),
+ d ? MEMF_node(domain_to_node(d)) : 0);
vaddr = map_domain_page(page_to_mfn(pg));
if ( !vaddr )
return 0;
- memset(vaddr, 0, PAGE_SIZE);
+ memset(vaddr, 0, PAGE_SIZE * npages);
iommu_flush_cache_page(vaddr);
unmap_domain_page(vaddr);
ir_ctrl = iommu_ir_ctrl(iommu);
if ( ir_ctrl->iremap_maddr == 0 )
{
- ir_ctrl->iremap_maddr = alloc_pgtable_maddr(NULL);
+ ir_ctrl->iremap_maddr = alloc_pgtable_maddr(NULL, 1);
if ( ir_ctrl->iremap_maddr == 0 )
{
dprintk(XENLOG_WARNING VTDPREFIX,
__iommu_flush_cache(addr, 8);
}
-void iommu_flush_cache_page(void *addr)
+void iommu_flush_cache_page(void *addr, unsigned long npages)
{
- __iommu_flush_cache(addr, PAGE_SIZE_4K);
+ __iommu_flush_cache(addr, PAGE_SIZE_4K * npages);
}
int nr_iommus;
root = &root_entries[bus];
if ( !root_present(*root) )
{
- maddr = alloc_pgtable_maddr(NULL);
+ maddr = alloc_pgtable_maddr(NULL, 1);
if ( maddr == 0 )
{
unmap_vtd_domain_page(root_entries);
addr &= (((u64)1) << addr_width) - 1;
ASSERT(spin_is_locked(&hd->mapping_lock));
if ( hd->pgd_maddr == 0 )
- if ( !alloc || ((hd->pgd_maddr = alloc_pgtable_maddr(domain)) == 0) )
+ if ( !alloc || ((hd->pgd_maddr = alloc_pgtable_maddr(domain, 1)) == 0) )
goto out;
parent = (struct dma_pte *)map_vtd_domain_page(hd->pgd_maddr);
{
if ( !alloc )
break;
- maddr = alloc_pgtable_maddr(domain);
+ maddr = alloc_pgtable_maddr(domain, 1);
if ( !maddr )
break;
dma_set_pte_addr(*pte, maddr);
spin_lock(&iommu->lock);
if ( iommu->root_maddr == 0 )
- iommu->root_maddr = alloc_pgtable_maddr(NULL);
+ iommu->root_maddr = alloc_pgtable_maddr(NULL, 1);
if ( iommu->root_maddr == 0 )
{
spin_unlock(&iommu->lock);
u32 udata;
};
-#define QINVAL_ENTRY_NR (PAGE_SIZE_4K/sizeof(struct qinval_entry))
+#define MAX_QINVAL_PAGES 8
+#define NUM_QINVAL_PAGES 1
+#define QINVAL_ENTRY_NR (PAGE_SIZE_4K*NUM_QINVAL_PAGES/sizeof(struct qinval_entry))
#define qinval_present(v) ((v).lo & 1)
#define qinval_fault_disable(v) (((v).lo >> 1) & 1)
if ( qi_ctrl->qinval_maddr == 0 )
{
- qi_ctrl->qinval_maddr = alloc_pgtable_maddr(NULL);
+ qi_ctrl->qinval_maddr = alloc_pgtable_maddr(NULL, NUM_QINVAL_PAGES);
if ( qi_ctrl->qinval_maddr == 0 )
{
dprintk(XENLOG_WARNING VTDPREFIX,
* registers are automatically reset to 0 with write
* to IQA register.
*/
+ if ( NUM_QINVAL_PAGES <= MAX_QINVAL_PAGES )
+ qi_ctrl->qinval_maddr |= NUM_QINVAL_PAGES - 1;
dmar_writeq(iommu->reg, DMAR_IQA_REG, qi_ctrl->qinval_maddr);
/* enable queued invalidation hardware */
void cacheline_flush(char *);
void flush_all_cache(void);
void *map_to_nocache_virt(int nr_iommus, u64 maddr);
-u64 alloc_pgtable_maddr(struct domain *d);
+u64 alloc_pgtable_maddr(struct domain *d, unsigned long npages);
void free_pgtable_maddr(u64 maddr);
void *map_vtd_domain_page(u64 maddr);
void unmap_vtd_domain_page(void *va);
void iommu_flush_cache_entry(void *addr);
-void iommu_flush_cache_page(void *addr);
+void iommu_flush_cache_page(void *addr, unsigned long npages);
#endif // _VTD_H_
}
/* Allocate page table, return its machine address */
-u64 alloc_pgtable_maddr(struct domain *d)
+u64 alloc_pgtable_maddr(struct domain *d, unsigned long npages)
{
struct page_info *pg;
u64 *vaddr;
unsigned long mfn;
- pg = alloc_domheap_page(NULL, d ? MEMF_node(domain_to_node(d)) : 0);
+ pg = alloc_domheap_pages(NULL, get_order_from_pages(npages),
+ d ? MEMF_node(domain_to_node(d)) : 0);
if ( !pg )
return 0;
mfn = page_to_mfn(pg);
vaddr = map_domain_page(mfn);
- memset(vaddr, 0, PAGE_SIZE);
+ memset(vaddr, 0, PAGE_SIZE * npages);
- iommu_flush_cache_page(vaddr);
+ iommu_flush_cache_page(vaddr, npages);
unmap_domain_page(vaddr);
return (u64)mfn << PAGE_SHIFT_4K;